From e00e5ed29911716f03a2dc20622921abc6059aa8 Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Thu, 17 Jul 2003 16:31:05 +0000 Subject: [PATCH] bitkeeper revision 1.361 (3f16cf49R1T-lTlNN9i48cmqUfzLEQ) smpboot.c, setup.c, nmi.c, apic.c: Fix NMIU watchdog to work properly on Hyperthread processors. --- xen/arch/i386/apic.c | 4 +- xen/arch/i386/nmi.c | 128 +++++++++++++++++++++++++++------------- xen/arch/i386/setup.c | 51 +++++++++++++--- xen/arch/i386/smpboot.c | 3 + 4 files changed, 134 insertions(+), 52 deletions(-) diff --git a/xen/arch/i386/apic.c b/xen/arch/i386/apic.c index 11d0c54532..7710eaa803 100644 --- a/xen/arch/i386/apic.c +++ b/xen/arch/i386/apic.c @@ -354,8 +354,8 @@ void __init setup_local_APIC (void) printk("No ESR for 82489DX.\n"); } - if (nmi_watchdog == NMI_LOCAL_APIC) - setup_apic_nmi_watchdog(); + if ( (smp_processor_id() == 0) && (nmi_watchdog == NMI_LOCAL_APIC) ) + setup_apic_nmi_watchdog(); } diff --git a/xen/arch/i386/nmi.c b/xen/arch/i386/nmi.c index f48efb2322..7062e56865 100644 --- a/xen/arch/i386/nmi.c +++ b/xen/arch/i386/nmi.c @@ -9,6 +9,7 @@ * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. * Mikael Pettersson : Power Management for local APIC NMI watchdog. * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog. + * Keir Fraser : Pentium 4 Hyperthreading support */ #include @@ -28,10 +29,13 @@ #include unsigned int nmi_watchdog = NMI_NONE; +unsigned int watchdog_on = 0; static unsigned int nmi_hz = HZ; unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ extern void show_registers(struct pt_regs *regs); +extern int logical_proc_id[]; + #define K7_EVNTSEL_ENABLE (1 << 22) #define K7_EVNTSEL_INT (1 << 20) #define K7_EVNTSEL_OS (1 << 17) @@ -52,25 +56,37 @@ extern void show_registers(struct pt_regs *regs); #define MSR_P4_PERFCTR0 0x300 #define MSR_P4_CCCR0 0x360 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25) -#define P4_ESCR_OS (1<<3) -#define P4_ESCR_USR (1<<2) -#define P4_CCCR_OVF_PMI (1<<26) +#define P4_ESCR_OS0 (1<<3) +#define P4_ESCR_USR0 (1<<2) +#define P4_ESCR_OS1 (1<<1) +#define P4_ESCR_USR1 (1<<0) +#define P4_CCCR_OVF_PMI0 (1<<26) +#define P4_CCCR_OVF_PMI1 (1<<27) #define P4_CCCR_THRESHOLD(N) ((N)<<20) #define P4_CCCR_COMPLEMENT (1<<19) #define P4_CCCR_COMPARE (1<<18) #define P4_CCCR_REQUIRED (3<<16) #define P4_CCCR_ESCR_SELECT(N) ((N)<<13) #define P4_CCCR_ENABLE (1<<12) -/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter - CRU_ESCR0 (with any non-null event selector) through a complemented - max threshold. [IA32-Vol3, Section 14.9.9] */ +/* + * Set up IQ_COUNTER{0,1} to behave like a clock, by having IQ_CCCR{0,1} filter + * CRU_ESCR0 (with any non-null event selector) through a complemented + * max threshold. [IA32-Vol3, Section 14.9.9] + */ #define MSR_P4_IQ_COUNTER0 0x30C +#define MSR_P4_IQ_COUNTER1 0x30D #define MSR_P4_IQ_CCCR0 0x36C -#define MSR_P4_CRU_ESCR0 0x3B8 -#define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR) +#define MSR_P4_IQ_CCCR1 0x36D +#define MSR_P4_CRU_ESCR0 0x3B8 /* ESCR no. 4 */ +#define P4_NMI_CRU_ESCR0 \ + (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS0|P4_ESCR_USR0| \ + P4_ESCR_OS1|P4_ESCR_USR1) #define P4_NMI_IQ_CCCR0 \ - (P4_CCCR_OVF_PMI|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ - P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) + (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ + P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) +#define P4_NMI_IQ_CCCR1 \ + (P4_CCCR_OVF_PMI1|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ + P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) int __init check_nmi_watchdog (void) { @@ -92,7 +108,7 @@ int __init check_nmi_watchdog (void) for (j = 0; j < smp_num_cpus; j++) { cpu = cpu_logical_map(j); if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) - printk("CPU#%d: NMI stuck? (Hyperthread secondary CPU?)\n", cpu); + printk("CPU#%d: NMI stuck?\n", cpu); else printk("CPU#%d: NMI okay\n", cpu); } @@ -175,23 +191,39 @@ static int __pminit setup_p4_watchdog(void) nmi_perfctr_msr = MSR_P4_IQ_COUNTER0; - if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) - clear_msr_range(0x3F1, 2); - /* MSR 0x3F0 seems to have a default value of 0xFC00, but current - docs doesn't fully define it, so leave it alone for now. */ - clear_msr_range(0x3A0, 31); - clear_msr_range(0x3C0, 6); - clear_msr_range(0x3C8, 6); - clear_msr_range(0x3E0, 2); - clear_msr_range(MSR_P4_CCCR0, 18); - clear_msr_range(MSR_P4_PERFCTR0, 18); - - wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); - Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000)); - wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1); - apic_write(APIC_LVTPC, APIC_DM_NMI); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0); + if ( logical_proc_id[smp_processor_id()] == 0 ) + { + if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) + clear_msr_range(0x3F1, 2); + /* MSR 0x3F0 seems to have a default value of 0xFC00, but current + docs doesn't fully define it, so leave it alone for now. */ + clear_msr_range(0x3A0, 31); + clear_msr_range(0x3C0, 6); + clear_msr_range(0x3C8, 6); + clear_msr_range(0x3E0, 2); + clear_msr_range(MSR_P4_CCCR0, 18); + clear_msr_range(MSR_P4_PERFCTR0, 18); + + wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); + wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); + Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000)); + wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1); + apic_write(APIC_LVTPC, APIC_DM_NMI); + wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0); + } + else if ( logical_proc_id[smp_processor_id()] == 1 ) + { + wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1 & ~P4_CCCR_ENABLE, 0); + Dprintk("setting P4_IQ_COUNTER2 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000)); + wrmsr(MSR_P4_IQ_COUNTER1, -(cpu_khz/nmi_hz*1000), -1); + apic_write(APIC_LVTPC, APIC_DM_NMI); + wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1, 0); + } + else + { + return 0; + } + return 1; } @@ -246,8 +278,9 @@ void nmi_watchdog_tick (struct pt_regs * regs) int sum, cpu = smp_processor_id(); sum = apic_timer_irqs[cpu]; - - if (last_irq_sums[cpu] == sum) { + + if ( (last_irq_sums[cpu] == sum) && watchdog_on ) + { /* * Ayiee, looks like this CPU is stuck ... wait a few IRQs (5 seconds) * before doing the oops ... @@ -257,22 +290,33 @@ void nmi_watchdog_tick (struct pt_regs * regs) console_lock = SPIN_LOCK_UNLOCKED; die("NMI Watchdog detected LOCKUP on CPU", regs, cpu); } - } else { + } + else + { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } - if (nmi_perfctr_msr) { - if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { - /* - * P4 quirks: - An overflown perfctr will assert its interrupt - * until the OVF flag in its CCCR is cleared. - LVTPC is masked - * on interrupt and must be - * unmasked by the LVTPC handler. - */ - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0); - apic_write(APIC_LVTPC, APIC_DM_NMI); + if ( nmi_perfctr_msr ) + { + if ( nmi_perfctr_msr == MSR_P4_IQ_COUNTER0 ) + { + if ( logical_proc_id[cpu] == 0 ) + { + wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0); + apic_write(APIC_LVTPC, APIC_DM_NMI); + wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1); + } + else + { + wrmsr(MSR_P4_IQ_CCCR1, P4_NMI_IQ_CCCR1, 0); + apic_write(APIC_LVTPC, APIC_DM_NMI); + wrmsr(MSR_P4_IQ_COUNTER1, -(cpu_khz/nmi_hz*1000), -1); + } + } + else + { + wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); } - wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); } } diff --git a/xen/arch/i386/setup.c b/xen/arch/i386/setup.c index 1e3695f847..521f135d48 100644 --- a/xen/arch/i386/setup.c +++ b/xen/arch/i386/setup.c @@ -25,6 +25,9 @@ struct task_struct *idle_task[NR_CPUS] = { &idle0_task }; /* for asm/domain_page.h, map_domain_page() */ unsigned long *mapcache[NR_CPUS]; +int phys_proc_id[NR_CPUS]; +int logical_proc_id[NR_CPUS]; + /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) { @@ -79,9 +82,45 @@ void __init get_cpu_vendor(struct cpuinfo_x86 *c) static void __init init_intel(struct cpuinfo_x86 *c) { + extern int opt_noht, opt_noacpi; + /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */ if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 ) clear_bit(X86_FEATURE_SEP, &c->x86_capability); + + if ( opt_noht ) + { + opt_noacpi = 1; /* Virtual CPUs only appear in ACPI tables. */ + clear_bit(X86_FEATURE_HT, &c->x86_capability[0]); + } + +#ifdef CONFIG_SMP + if ( test_bit(X86_FEATURE_HT, &c->x86_capability) ) + { + u32 eax, ebx, ecx, edx; + int initial_apic_id, siblings, cpu = smp_processor_id(); + + cpuid(1, &eax, &ebx, &ecx, &edx); + siblings = (ebx & 0xff0000) >> 16; + + if ( siblings <= 1 ) + { + printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu); + } + else if ( siblings > 2 ) + { + panic("We don't support more than two logical CPUs per package!"); + } + else + { + initial_apic_id = ebx >> 24 & 0xff; + phys_proc_id[cpu] = initial_apic_id >> 1; + logical_proc_id[cpu] = initial_apic_id & 1; + printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n", + cpu, phys_proc_id[cpu], logical_proc_id[cpu]); + } + } +#endif } static void __init init_amd(struct cpuinfo_x86 *c) @@ -104,10 +143,12 @@ static void __init init_amd(struct cpuinfo_x86 *c) */ void __init identify_cpu(struct cpuinfo_x86 *c) { - extern int opt_noht, opt_noacpi; - int junk, i; + int junk, i, cpu = smp_processor_id(); u32 xlvl, tfms; + phys_proc_id[cpu] = cpu; + logical_proc_id[cpu] = 0; + c->x86_vendor = X86_VENDOR_UNKNOWN; c->cpuid_level = -1; /* CPUID not detected */ c->x86_model = c->x86_mask = 0; /* So far unknown... */ @@ -166,12 +207,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c) panic("Only support Intel processors (P6+)\n"); } - if ( opt_noht ) - { - opt_noacpi = 1; /* Virtual CPUs only appear in ACPI tables. */ - clear_bit(X86_FEATURE_HT, &c->x86_capability[0]); - } - printk("CPU caps: %08x %08x %08x %08x\n", c->x86_capability[0], c->x86_capability[1], diff --git a/xen/arch/i386/smpboot.c b/xen/arch/i386/smpboot.c index 5d387e6677..2ebab30c72 100644 --- a/xen/arch/i386/smpboot.c +++ b/xen/arch/i386/smpboot.c @@ -381,6 +381,9 @@ void __init smp_callin(void) */ smp_store_cpu_info(cpuid); + if (nmi_watchdog == NMI_LOCAL_APIC) + setup_apic_nmi_watchdog(); + /* * Allow the master to continue. */ -- 2.30.2